[IA64] Enable SMP on VTI-Domain
authorawilliam@xenbuild.aw <awilliam@xenbuild.aw>
Thu, 8 Jun 2006 17:00:09 +0000 (11:00 -0600)
committerawilliam@xenbuild.aw <awilliam@xenbuild.aw>
Thu, 8 Jun 2006 17:00:09 +0000 (11:00 -0600)
Signed-off-by: Anthony Xu < anthony.xu@intel.com >
xen/arch/ia64/vmx/mmio.c
xen/arch/ia64/vmx/vlsapic.c
xen/arch/ia64/vmx/vmmu.c
xen/arch/ia64/vmx/vmx_support.c
xen/arch/ia64/vmx/vmx_virt.c
xen/arch/ia64/vmx/vtlb.c
xen/arch/ia64/xen/xentime.c
xen/include/asm-ia64/vmx.h

index 16588c60d06591447a1e203a70ff0b69339e47f8..f94e0f4f5eb6776d4b38e411759a3bad366a07ad 100644 (file)
@@ -33,8 +33,9 @@
 #include <asm/mm.h>
 #include <asm/vmx.h>
 #include <public/event_channel.h>
+#include <public/arch-ia64.h>
 #include <linux/event.h>
-
+#include <xen/domain.h>
 /*
 struct mmio_list *lookup_mmio(u64 gpa, struct mmio_list *mio_base)
 {
@@ -51,7 +52,7 @@ struct mmio_list *lookup_mmio(u64 gpa, struct mmio_list *mio_base)
 #define PIB_OFST_INTA           0x1E0000
 #define PIB_OFST_XTP            0x1E0008
 
-static int write_ipi (VCPU *vcpu, uint64_t addr, uint64_t value);
+static void write_ipi (VCPU *vcpu, uint64_t addr, uint64_t value);
 
 static void pib_write(VCPU *vcpu, void *src, uint64_t pib_off, size_t s, int ma)
 {
@@ -356,42 +357,67 @@ static void deliver_ipi (VCPU *vcpu, uint64_t dm, uint64_t vector)
  */
 static inline VCPU *lid_2_vcpu (struct domain *d, u64 id, u64 eid)
 {
-       int   i;
-       VCPU  *vcpu;
-       LID       lid;
-       for (i=0; i<MAX_VIRT_CPUS; i++) {
-               vcpu = d->vcpu[i];
-               if (!vcpu)
-                       continue;
-               lid.val = VCPU(vcpu, lid);
-               if ( lid.id == id && lid.eid == eid ) {
-                   return vcpu;
-               }
-       }
-       return NULL;
+    int   i;
+    VCPU  *vcpu;
+    LID   lid;
+    for (i=0; i<MAX_VIRT_CPUS; i++) {
+        vcpu = d->vcpu[i];
+        if (!vcpu)
+            continue;
+        lid.val = VCPU_LID(vcpu);
+        if ( lid.id == id && lid.eid == eid )
+            return vcpu;
+    }
+    return NULL;
 }
 
 /*
  * execute write IPI op.
  */
-static int write_ipi (VCPU *vcpu, uint64_t addr, uint64_t value)
+static void write_ipi (VCPU *vcpu, uint64_t addr, uint64_t value)
 {
-    VCPU   *target_cpu;
-    target_cpu = lid_2_vcpu(vcpu->domain, 
-                               ((ipi_a_t)addr).id, ((ipi_a_t)addr).eid);
-    if ( target_cpu == NULL ) panic_domain (NULL,"Unknown IPI cpu\n");
-    if ( target_cpu == vcpu ) {
-       // IPI to self
-        deliver_ipi (vcpu, ((ipi_d_t)value).dm, 
-                ((ipi_d_t)value).vector);
-        return 1;
+    VCPU   *targ;
+    struct domain *d=vcpu->domain; 
+    targ = lid_2_vcpu(vcpu->domain, 
+           ((ipi_a_t)addr).id, ((ipi_a_t)addr).eid);
+    if ( targ == NULL ) panic_domain (NULL,"Unknown IPI cpu\n");
+
+    if (!test_bit(_VCPUF_initialised, &targ->vcpu_flags)) {
+        struct pt_regs *targ_regs = vcpu_regs (targ);
+        struct vcpu_guest_context c;
+
+        printf ("arch_boot_vcpu: %p %p\n",
+                (void *)d->arch.boot_rdv_ip,
+                (void *)d->arch.boot_rdv_r1);
+        memset (&c, 0, sizeof (c));
+
+        c.flags = VGCF_VMX_GUEST;
+        if (arch_set_info_guest (targ, &c) != 0) {
+            printf ("arch_boot_vcpu: failure\n");
+            return;
+        }
+        /* First or next rendez-vous: set registers.  */
+        vcpu_init_regs (targ);
+        targ_regs->cr_iip = d->arch.boot_rdv_ip;
+        targ_regs->r1 = d->arch.boot_rdv_r1;
+
+        if (test_and_clear_bit(_VCPUF_down,&targ->vcpu_flags)) {
+            vcpu_wake(targ);
+            printf ("arch_boot_vcpu: vcpu %d awaken %016lx!\n",
+                    targ->vcpu_id, targ_regs->cr_iip);
+        }
+        else
+            printf ("arch_boot_vcpu: huu, already awaken!");
     }
     else {
-       // TODO: send Host IPI to inject guest SMP IPI interruption
-        panic_domain (NULL, "No SM-VP supported!\n");
-        return 0;
+        int running = test_bit(_VCPUF_running,&targ->vcpu_flags);
+        deliver_ipi (targ, ((ipi_d_t)value).dm, 
+                    ((ipi_d_t)value).vector);
+        vcpu_unblock(targ);
+        if (running)
+            smp_send_event_check_cpu(targ->processor);
     }
+    return;
 }
 
 
index f0d5c45f8481080aa6c2122688056c33848ac181..d519cdafa0ef382f484bc42dba6566e86150a028 100644 (file)
@@ -362,7 +362,7 @@ void vlsapic_reset(VCPU *vcpu)
 {
     int     i;
 
-    VCPU(vcpu, lid) = ia64_getreg(_IA64_REG_CR_LID);
+    VCPU(vcpu, lid) = VCPU_LID(vcpu);
     VCPU(vcpu, ivr) = 0;
     VCPU(vcpu,tpr) = 0x10000;
     VCPU(vcpu, eoi) = 0;
index 31a52152f1be42fea7babbc2469c4f9518034b79..49b289707cd71e9dda80662cc5a521cbc5b3f15f 100644 (file)
@@ -492,15 +492,66 @@ IA64FAULT vmx_vcpu_ptc_e(VCPU *vcpu, UINT64 va)
 
 IA64FAULT vmx_vcpu_ptc_g(VCPU *vcpu, UINT64 va, UINT64 ps)
 {
-    vmx_vcpu_ptc_l(vcpu, va, ps);
+    vmx_vcpu_ptc_ga(vcpu, va, ps);
     return IA64_ILLOP_FAULT;
 }
-
+/*
 IA64FAULT vmx_vcpu_ptc_ga(VCPU *vcpu,UINT64 va,UINT64 ps)
 {
     vmx_vcpu_ptc_l(vcpu, va, ps);
     return IA64_NO_FAULT;
 }
+ */
+struct ptc_ga_args {
+    unsigned long vadr;
+    unsigned long rid;
+    unsigned long ps;
+    struct vcpu *vcpu;
+};
+
+static void ptc_ga_remote_func (void *varg)
+{
+    u64 oldrid, moldrid;
+    VCPU *v;
+    struct ptc_ga_args *args = (struct ptc_ga_args *)varg;
+    v = args->vcpu;
+    oldrid = VMX(v, vrr[0]);
+    VMX(v, vrr[0]) = args->rid;
+    moldrid = ia64_get_rr(0x0);
+    ia64_set_rr(0x0,vrrtomrr(v,args->rid));
+    vmx_vcpu_ptc_l(v, args->vadr, args->ps);
+    VMX(v, vrr[0]) = oldrid; 
+    ia64_set_rr(0x0,moldrid);
+}
+
+
+IA64FAULT vmx_vcpu_ptc_ga(VCPU *vcpu,UINT64 va,UINT64 ps)
+{
+
+    struct domain *d = vcpu->domain;
+    struct vcpu *v;
+    struct ptc_ga_args args;
+
+    args.vadr = va<<3>>3;
+    vcpu_get_rr(vcpu, va, &args.rid);
+    args.ps = ps;
+    for_each_vcpu (d, v) {
+        args.vcpu = v;
+        if (v->processor != vcpu->processor) {
+            int proc;
+            /* Flush VHPT on remote processors.  */
+            do {
+                proc = v->processor;
+                smp_call_function_single(v->processor, 
+                    &ptc_ga_remote_func, &args, 0, 1);
+                /* Try again if VCPU has migrated.  */
+            } while (proc != v->processor);
+        }
+        else
+            ptc_ga_remote_func(&args);
+    }
+    return IA64_NO_FAULT;
+}
 
 
 IA64FAULT vmx_vcpu_thash(VCPU *vcpu, UINT64 vadr, UINT64 *pval)
index f4f0a89bf15aba97079ca7357b5d88753c3381de..2be02a816e3c916bd3f50aad515194bf6cecc0e7 100644 (file)
@@ -138,7 +138,8 @@ void vmx_intr_assist(struct vcpu *v)
 
 #ifdef V_IOSAPIC_READY
     /* Confirm virtual interrupt line signals, and set pending bits in vpd */
-    vmx_virq_line_assist(v);
+    if(v->vcpu_id==0)
+        vmx_virq_line_assist(v);
 #endif
     return;
 }
index b5682d9a8b0877fe1ab5b79c260955ff3e0f374c..7b2b1b613237f3cfa26c98bb61c1734fd3856fb8 100644 (file)
@@ -317,12 +317,68 @@ IA64FAULT vmx_emul_ptc_e(VCPU *vcpu, INST64 inst)
 
 IA64FAULT vmx_emul_ptc_g(VCPU *vcpu, INST64 inst)
 {
-    return vmx_emul_ptc_l(vcpu, inst);
+    u64 r2,r3;
+#ifdef  VMAL_NO_FAULT_CHECK    
+    IA64_PSR  vpsr;
+    vpsr.val=vmx_vcpu_get_psr(vcpu);
+    if ( vpsr.cpl != 0) {
+        /* Inject Privileged Operation fault into guest */
+        set_privileged_operation_isr (vcpu, 0);
+        privilege_op (vcpu);
+        return IA64_FAULT;
+    }
+#endif // VMAL_NO_FAULT_CHECK    
+    if(vcpu_get_gr_nat(vcpu,inst.M45.r3,&r3)||vcpu_get_gr_nat(vcpu,inst.M45.r2,&r2)){
+#ifdef  VMAL_NO_FAULT_CHECK
+        ISR isr;
+        set_isr_reg_nat_consumption(vcpu,0,0);
+        rnat_comsumption(vcpu);
+        return IA64_FAULT;
+#endif // VMAL_NO_FAULT_CHECK
+    }
+#ifdef  VMAL_NO_FAULT_CHECK
+    if (unimplemented_gva(vcpu,r3) ) {
+        isr.val = set_isr_ei_ni(vcpu);
+        isr.code = IA64_RESERVED_REG_FAULT;
+        vcpu_set_isr(vcpu, isr.val);
+        unimpl_daddr(vcpu);
+        return IA64_FAULT;
+   }
+#endif // VMAL_NO_FAULT_CHECK
+    return vmx_vcpu_ptc_g(vcpu,r3,bits(r2,2,7));
 }
 
 IA64FAULT vmx_emul_ptc_ga(VCPU *vcpu, INST64 inst)
 {
-    return vmx_emul_ptc_l(vcpu, inst);
+    u64 r2,r3;
+#ifdef  VMAL_NO_FAULT_CHECK    
+    IA64_PSR  vpsr;
+    vpsr.val=vmx_vcpu_get_psr(vcpu);
+    if ( vpsr.cpl != 0) {
+        /* Inject Privileged Operation fault into guest */
+        set_privileged_operation_isr (vcpu, 0);
+        privilege_op (vcpu);
+        return IA64_FAULT;
+    }
+#endif // VMAL_NO_FAULT_CHECK    
+    if(vcpu_get_gr_nat(vcpu,inst.M45.r3,&r3)||vcpu_get_gr_nat(vcpu,inst.M45.r2,&r2)){
+#ifdef  VMAL_NO_FAULT_CHECK
+        ISR isr;
+        set_isr_reg_nat_consumption(vcpu,0,0);
+        rnat_comsumption(vcpu);
+        return IA64_FAULT;
+#endif // VMAL_NO_FAULT_CHECK
+    }
+#ifdef  VMAL_NO_FAULT_CHECK
+    if (unimplemented_gva(vcpu,r3) ) {
+        isr.val = set_isr_ei_ni(vcpu);
+        isr.code = IA64_RESERVED_REG_FAULT;
+        vcpu_set_isr(vcpu, isr.val);
+        unimpl_daddr(vcpu);
+        return IA64_FAULT;
+   }
+#endif // VMAL_NO_FAULT_CHECK
+    return vmx_vcpu_ptc_ga(vcpu,r3,bits(r2,2,7));
 }
 
 IA64FAULT ptr_fault_check(VCPU *vcpu, INST64 inst, u64 *pr2, u64 *pr3)
@@ -1191,7 +1247,6 @@ IA64FAULT vmx_emul_mov_to_cr(VCPU *vcpu, INST64 inst)
     }
 #endif  //CHECK_FAULT
     r2 = cr_igfld_mask(inst.M32.cr3,r2);
-    VCPU(vcpu, vcr[inst.M32.cr3]) = r2;
     switch (inst.M32.cr3) {
         case 0: return vmx_vcpu_set_dcr(vcpu,r2);
         case 1: return vmx_vcpu_set_itm(vcpu,r2);
@@ -1207,7 +1262,7 @@ IA64FAULT vmx_emul_mov_to_cr(VCPU *vcpu, INST64 inst)
         case 24:return vcpu_set_iim(vcpu,r2);
         case 25:return vcpu_set_iha(vcpu,r2);
         case 64:printk("SET LID to 0x%lx\n", r2);
-               return vmx_vcpu_set_lid(vcpu,r2);
+                return IA64_NO_FAULT;
         case 65:return IA64_NO_FAULT;
         case 66:return vmx_vcpu_set_tpr(vcpu,r2);
         case 67:return vmx_vcpu_set_eoi(vcpu,r2);
@@ -1220,7 +1275,8 @@ IA64FAULT vmx_emul_mov_to_cr(VCPU *vcpu, INST64 inst)
         case 74:return vmx_vcpu_set_cmcv(vcpu,r2);
         case 80:return vmx_vcpu_set_lrr0(vcpu,r2);
         case 81:return vmx_vcpu_set_lrr1(vcpu,r2);
-        default: return IA64_NO_FAULT;
+        default:VCPU(vcpu, vcr[inst.M32.cr3]) = r2;
+                return IA64_NO_FAULT;
     }
 }
 
index 21cce702b744f8352ab59a2a3be7f4f97765fc2c..57466da8c744fd8ae44c380cfb3283763efab4d2 100644 (file)
@@ -58,12 +58,6 @@ static thash_data_t *cch_alloc(thash_cb_t *hcb)
     return p;
 }
 
-static void cch_free(thash_cb_t *hcb, thash_data_t *cch)
-{
-    cch->next = hcb->cch_freelist;
-    hcb->cch_freelist = cch;
-}
-
 /*
  * Check to see if the address rid:va is translated by the TLB
  */
@@ -94,22 +88,6 @@ __is_tr_overlap(thash_data_t *trp, u64 rid, u64 sva, u64 eva)
 
 }
 
-/*
- * Delete an thash entry leading collision chain.
- */
-static void __rem_hash_head(thash_cb_t *hcb, thash_data_t *hash)
-{
-    thash_data_t *next=hash->next;
-    if ( next) {
-        next->len=hash->len-1;
-        *hash = *next;
-        cch_free (hcb, next);
-    }
-    else {
-        hash->ti=1;
-    }
-}
-
 thash_data_t *__vtr_lookup(VCPU *vcpu, u64 va, int is_data)
 {
 
@@ -142,17 +120,18 @@ thash_data_t *__vtr_lookup(VCPU *vcpu, u64 va, int is_data)
 
 static void thash_recycle_cch(thash_cb_t *hcb, thash_data_t *hash)
 {
-    thash_data_t *p;
+    thash_data_t *p, *q;
     int i=0;
     
     p=hash;
     for(i=0; i < MAX_CCN_DEPTH; i++){
         p=p->next;
     }
-    p->next=hcb->cch_freelist;
-    hcb->cch_freelist=hash->next;
+    q=hash->next;
     hash->len=0;
     hash->next=0;
+    p->next=hcb->cch_freelist;
+    hcb->cch_freelist=q;
 }
 
 
@@ -265,16 +244,14 @@ static void vtlb_purge(thash_cb_t *hcb, u64 va, u64 ps)
         hash_table = vsa_thash(hcb->pta, start, vrr.rrval, &tag);
         if(!INVALID_TLB(hash_table)){
             if(hash_table->etag == tag){
-                __rem_hash_head(hcb, hash_table);
+                 hash_table->etag = 1UL<<63;
             }
             else{
                 prev=hash_table;
                 next=prev->next;
                 while(next){
                     if(next->etag == tag){
-                        prev->next=next->next;
-                        cch_free(hcb,next);
-                        hash_table->len--;
+                        next->etag = 1UL<<63;
                         break;
                     }
                     prev=next;
@@ -300,16 +277,14 @@ static void vhpt_purge(thash_cb_t *hcb, u64 va, u64 ps)
         hash_table = (thash_data_t *)ia64_thash(start);
         tag = ia64_ttag(start);
         if(hash_table->etag == tag ){
-            __rem_hash_head(hcb, hash_table);
+            hash_table->etag = 1UL<<63; 
         }
         else{
             prev=hash_table;
             next=prev->next;
             while(next){
                 if(next->etag == tag){
-                    prev->next=next->next;
-                    cch_free(hcb,next);
-                    hash_table->len--;
+                    next->etag = 1UL<<63;
                     break; 
                 }
                 prev=next;
@@ -383,7 +358,6 @@ void vtlb_insert(thash_cb_t *hcb, u64 pte, u64 itir, u64 va)
         hash_table->page_flags = pte;
         hash_table->itir=itir;
         hash_table->etag=tag;
-        hash_table->next = 0;
         return;
     }
     if (hash_table->len>=MAX_CCN_DEPTH){
@@ -539,7 +513,6 @@ void thash_purge_all(VCPU *v)
         num--;
     }while(num);
     cch_mem_init(vhpt);
-
     local_flush_tlb_all();
 }
 
index c997b5ad1e5a7758d05130b288dcb87aa015a84a..b1d32c50aeb9770b584dbfc5cbb33c811d44c7bc 100644 (file)
@@ -124,7 +124,7 @@ xen_timer_interrupt (int irq, void *dev_id, struct pt_regs *regs)
 #endif
 #endif
 
-       if (!is_idle_domain(current->domain))
+       if (!is_idle_domain(current->domain)&&!VMX_DOMAIN(current))
                if (vcpu_timer_expired(current)) {
                        vcpu_pend_timer(current);
                        // ensure another timer interrupt happens even if domain doesn't
index db33b968790e5cd453332ff4ccec87da94b465c6..e25e471225a3c279f3c9e4befdf81c2cbd178373 100644 (file)
@@ -25,6 +25,8 @@
 #define RR7_SWITCH_SHIFT       12      /* 4k enough */
 #include <public/hvm/ioreq.h>
 
+#define VCPU_LID(v) (((u64)(v)->vcpu_id)<<24)
+
 extern void identify_vmx_feature(void);
 extern unsigned int vmx_enabled;
 extern void vmx_init_env(void);